DEFINE(IA64_PT_REGS_R6_OFFSET, offsetof (struct pt_regs, r6));
DEFINE(IA64_PT_REGS_R7_OFFSET, offsetof (struct pt_regs, r7));
DEFINE(IA64_PT_REGS_EML_UNAT_OFFSET, offsetof (struct pt_regs, eml_unat));
- DEFINE(IA64_PT_REGS_RFI_PFS_OFFSET, offsetof (struct pt_regs, rfi_pfs));
DEFINE(IA64_VCPU_IIPA_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_iipa));
DEFINE(IA64_VCPU_ISR_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cr_isr));
DEFINE(IA64_VCPU_CAUSE_OFFSET, offsetof (struct vcpu, arch.arch_vmx.cause));
BLANK();
DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs));
+ DEFINE(IA64_VPD_VIFS_OFFSET, offsetof (mapped_regs_t, ifs));
DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0]));
DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta));
DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t));
ld8 r19=[r16],PT(R3)-PT(AR_FPSR) //load ar_fpsr
ld8.fill r2=[r17],PT(AR_CCV)-PT(R2) //load r2
;;
- ld8.fill r3=[r16] //load r3
- ld8 r18=[r17],PT(RFI_PFS)-PT(AR_CCV) //load ar_ccv
+ ld8.fill r3=[r16] //load r3
+ ld8 r18=[r17] //load ar_ccv
;;
mov ar.fpsr=r19
mov ar.ccv=r18
;;
mov ar.bspstore=r24
;;
- ld8 r24=[r17] //load rfi_pfs
mov ar.unat=r28
mov ar.rnat=r25
mov ar.rsc=r26
mov cr.ipsr=r31
mov cr.iip=r30
mov cr.ifs=r29
- cmp.ne p6,p0=r24,r0
-(p6)br.sptk vmx_dorfirfi
- ;;
-vmx_dorfirfi_back:
mov ar.pfs=r27
adds r18=IA64_VPD_BASE_OFFSET,r21
;;
adds r19=VPD(VPSR),r18
;;
ld8 r19=[r19] //vpsr
-//vsa_sync_write_start
movl r20=__vsa_base
;;
+//vsa_sync_write_start
ld8 r20=[r20] // read entry point
mov r25=r18
;;
+ movl r24=ia64_vmm_entry // calculate return address
add r16=PAL_VPS_SYNC_WRITE,r20
- movl r24=switch_rr7 // calculate return address
;;
mov b0=r16
br.cond.sptk b0 // call the service
;;
END(ia64_leave_hypervisor)
-switch_rr7:
// fall through
GLOBAL_ENTRY(ia64_vmm_entry)
/*
br.cond.sptk b0 // call pal service
END(ia64_vmm_entry)
-//r24 rfi_pfs
-//r17 address of rfi_pfs
-GLOBAL_ENTRY(vmx_dorfirfi)
- mov r16=ar.ec
- movl r20 = vmx_dorfirfi_back
- ;;
-// clean rfi_pfs
- st8 [r17]=r0
- mov b0=r20
-// pfs.pec=ar.ec
- dep r24 = r16, r24, 52, 6
- ;;
- mov ar.pfs=r24
- ;;
- br.ret.sptk b0
- ;;
-END(vmx_dorfirfi)
#ifdef XEN_DBL_MAPPING /* will be removed */
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/vhpt.h>
+#include <asm/virt_event.h>
#ifdef VTI_DEBUG
/*
st8 [r16] = r24
st8 [r17] = r25
;;
+ cmp.ne p6,p0=EVENT_RFI, r24
+ (p6) br.sptk vmx_dispatch_virtualization_fault
+ ;;
+ adds r18=IA64_VPD_BASE_OFFSET,r21
+ ;;
+ ld8 r18=[r18]
+ ;;
+ adds r18=IA64_VPD_VIFS_OFFSET,r18
+ ;;
+ ld8 r18=[r18]
+ ;;
+ tbit.z p6,p0=r18,63
+ (p6) br.sptk vmx_dispatch_virtualization_fault
+ ;;
+ //if vifs.v=1 desert current register frame
+ alloc r18=ar.pfs,0,0,0,0
br.sptk vmx_dispatch_virtualization_fault
END(vmx_virtualization_fault)
vcpu_bsw1(vcpu);
vmx_vcpu_set_psr(vcpu,psr);
ifs=VCPU(vcpu,ifs);
- if((ifs>>63)&&(ifs<<1)){
- ifs=(regs->cr_ifs)&0x7f;
- regs->rfi_pfs = (ifs<<7)|ifs;
- regs->cr_ifs = VCPU(vcpu,ifs);
- }
+ if(ifs>>63)
+ regs->cr_ifs = ifs;
regs->cr_iip = VCPU(vcpu,iip);
return (IA64_NO_FAULT);
}
unsigned long r6; /* preserved */
unsigned long r7; /* preserved */
unsigned long eml_unat; /* used for emulating instruction */
- unsigned long rfi_pfs; /* used for elulating rfi */
+ unsigned long pad0; /* alignment pad */
};
typedef struct cpu_user_regs cpu_user_regs_t;